y_type, y_true, y_pred = _check_targets(y_true, y_pred)


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\sklearn\metrics\classification.py", line 88, in _check_targets

raise ValueError("{0} is not supported".format(y_type))


ValueError: unknown is not supported



In [50]:


In [50]: len(X_train

File "<ipython-input-50-bc2a6c718034>", line 1

len(X_train

^

SyntaxError: unexpected EOF while parsing



In [51]:


In [51]: len(X_train)

Out[51]: 4492


In [52]: def plot_sample(X, y, preds, binary_preds, ix=None):

    ...: if ix is None:

    ...: ix = random.randint(0, len(X))

    ...:

    ...: has_mask = y[ix].max() > 0

    ...:

    ...: fig, ax = plt.subplots(1, 4, figsize=(20, 10))

    ...:

    ...: ax[0].imshow(X[ix, ..., 0], cmap='seismic')

    ...: if has_mask:

    ...: ax[0].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[0].set_title('Image')

    ...:

    ...: ax[1].imshow(y[ix].squeeze())

    ...: ax[1].set_title('cancer')

    ...:

    ...: ax[2].imshow(preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[2].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[2].set_title('Image')

    ...:

    ...: ax[3].imshow(binary_preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[3].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[3].set_title('Cancer Predicted');


In [53]: def plot_sample(X, y, preds, binary_preds, ix=None):

    ...: if ix is None:

    ...: ix = random.randint(0, len(X))

    ...:

    ...: has_mask = y[ix].max() > 0

    ...:

    ...: fig, ax = plt.subplots(1, 4, figsize=(20, 10))

    ...:

    ...: ax[0].imshow(X[ix, ..., 0], cmap='seismic')

    ...:

    ...: if has_mask:

    ...: ax[0].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[0].set_title('Image')

    ...:

    ...: ax[1].imshow(y[ix].squeeze())

    ...: ax[1].set_title('Annotation')

    ...:

    ...: ax[2].imshow(preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[2].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[2].set_title('Prediction')

    ...:

    ...: ax[3].imshow(binary_preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[3].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[3].set_title('Threshold Prediction');


In [54]: count = 0

    ...: for i in len(X_train):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break

Traceback (most recent call last):


File "<ipython-input-54-ae95b2a4cce4>", line 2, in <module>

for i in len(X_train):


TypeError: 'int' object is not iterable



In [55]:


In [55]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break

Traceback (most recent call last):


File "<ipython-input-55-a51bcac3dc06>", line 3, in <module>

plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)


NameError: name 'preds_train_t' is not defined



In [56]:


In [56]: preds_train_t = (preds_train > 0.5).astype(np.uint8)


In [57]: preds_val_t = (preds_val > 0.5).astype(np.uint8)


In [58]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break












In [59]: import os

    ...: import random

    ...: import pandas as pd

    ...: import numpy as np

    ...: import matplotlib.pyplot as plt

    ...: plt.style.use("ggplot")

    ...:

    ...:

    ...: from tqdm import tqdm_notebook, tnrange

    ...: from itertools import chain

    ...: from skimage.io import imread, imshow, concatenate_images

    ...: from skimage.transform import resize

    ...: from skimage.morphology import label

    ...: from sklearn.model_selection import train_test_split

    ...:

    ...: import tensorflow as tf

    ...:

    ...: from keras.models import Model, load_model

    ...: from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout

    ...: from keras.layers.core import Lambda, RepeatVector, Reshape

    ...: from keras.layers.convolutional import Conv2D, Conv2DTranspose

    ...: from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D

    ...: from keras.layers.merge import concatenate, add

    ...: from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

    ...: from keras.optimizers import Adam

    ...: from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img

    ...: import tensorflow as tf

    ...:

    ...: X_train, X_valid, y_train, y_valid = train_test_split(Image_data, Target_data, test_size=0.20, random_state=2018)

    ...:

    ...:

    ...:

    ...: def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):

    ...: # first layer

    ...: x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",

    ...: padding="same")(input_tensor)

    ...: if batchnorm:

    ...: x = BatchNormalization()(x)

    ...: x = Activation("relu")(x)

    ...: # second layer

    ...: x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",

    ...: padding="same")(x)

    ...: if batchnorm:

    ...: x = BatchNormalization()(x)

    ...: x = Activation("relu")(x)

    ...: return x

    ...:

    ...:

    ...:

    ...:

    ...: def get_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):

    ...: # contracting path

    ...: c1 = conv2d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)

    ...: p1 = MaxPooling2D((2, 2)) (c1)

    ...: p1 = Dropout(dropout*0.5)(p1)

    ...:

    ...: c2 = conv2d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)

    ...: p2 = MaxPooling2D((2, 2)) (c2)

    ...: p2 = Dropout(dropout)(p2)

    ...:

    ...: c3 = conv2d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)

    ...: p3 = MaxPooling2D((2, 2)) (c3)

    ...: p3 = Dropout(dropout)(p3)

    ...:

    ...: c4 = conv2d_block(p3, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)

    ...: p4 = MaxPooling2D(pool_size=(2, 2)) (c4)

    ...: p4 = Dropout(dropout)(p4)

    ...:

    ...: c5 = conv2d_block(p4, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)

    ...:

    ...: # expansive path

    ...: u6 = Conv2DTranspose(n_filters*8, (3, 3), strides=(2, 2), padding='same') (c5)

    ...: u6 = concatenate([u6, c4])

    ...: u6 = Dropout(dropout)(u6)

    ...: c6 = conv2d_block(u6, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)

    ...:

    ...: u7 = Conv2DTranspose(n_filters*4, (3, 3), strides=(2, 2), padding='same') (c6)

    ...: u7 = concatenate([u7, c3])

    ...: u7 = Dropout(dropout)(u7)

    ...: c7 = conv2d_block(u7, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)

    ...:

    ...: u8 = Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same') (c7)

    ...: u8 = concatenate([u8, c2])

    ...: u8 = Dropout(dropout)(u8)

    ...: c8 = conv2d_block(u8, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)

    ...:

    ...: u9 = Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same') (c8)

    ...: u9 = concatenate([u9, c1], axis=3)

    ...: u9 = Dropout(dropout)(u9)

    ...: c9 = conv2d_block(u9, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)

    ...:

    ...: outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)

    ...: model = Model(inputs=[input_img], outputs=[outputs])

    ...: return model

    ...:

    ...:

    ...:

    ...:

    ...: im_width = 320

    ...: im_height = 320



In [60]: input_img = Input((im_height, im_width, 1), name='img')

    ...: model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True)

    ...:

    ...: model.compile(optimizer=Adam(), loss="mean_squared_error", metrics=["accuracy"])

    ...: #model.summary()

    ...:

    ...:

    ...: checkpoint_path = "E:\kits19\checkpoints\cp-{epoch:04d}.ckpt"

    ...: checkpoint_dir = os.path.dirname(checkpoint_path)


In [61]: callbacks = [

    ...: EarlyStopping(patience=3, verbose=1),

    ...: ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),

    ...: ModelCheckpoint(checkpoint_path, verbose=1, save_weights_only=True, period = 1)

    ...: ]


In [62]: loss, acc = model.evaluate(X_valid, y_valid)

    ...: print("Trained model, loss: {:5.2f}% , accuracy: {:5.2f}%".format(100*loss, 100*acc))

1123/1123 [==============================] - 403s 359ms/step

Trained model, loss: 72.43% , accuracy: 29.32%


In [63]: os.listdir(checkpoint_dir)

Out[63]:

['bianry_crossentropy',

'cp-0001.ckpt',

'cp-0002.ckpt',

'cp-0003.ckpt',

'cp-0004.ckpt',

'cp-0005.ckpt',

'cp-0006.ckpt',

'cp-0007.ckpt',

'cp-0008.ckpt',

'cp-0009.ckpt',

'cp-0010.ckpt',

'cp-0011.ckpt',

'mean square value_stop_due_to_learning_rate']


In [64]: latest = 'E:\kits19\checkpoints\bianry_crossentropy\cp-0006.ckpt'


In [65]: model.load_weights(latest)

Traceback (most recent call last):


File "<ipython-input-65-e531f1a34d9e>", line 1, in <module>

model.load_weights(latest)


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\keras\engine\network.py", line 1157, in load_weights

with h5py.File(filepath, mode='r') as f:


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\h5py\_hl\files.py", line 312, in __init__

fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\h5py\_hl\files.py", line 142, in make_fid

fid = h5f.open(name, flags, fapl=fapl)


File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper


File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper


File "h5py\h5f.pyx", line 78, in h5py.h5f.open


OSError: Unable to open file (unable to open file: name = 'E:\kits19\checkpointianry_crossentropy\cp-0006.ckpt', errno = 22, error message = 'Invalid argument', flags = 0, o_flags = 0)



In [66]:


In [66]: latest = 'E:\kits19\checkpoints\bianry_crossentropy\cp-0006.ckpt'


In [67]: model.load_weights(latest)

Traceback (most recent call last):


File "<ipython-input-67-e531f1a34d9e>", line 1, in <module>

model.load_weights(latest)


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\keras\engine\network.py", line 1157, in load_weights

with h5py.File(filepath, mode='r') as f:


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\h5py\_hl\files.py", line 312, in __init__

fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)


File "C:\Users\tensor19\Anaconda3\envs\kids\lib\site-packages\h5py\_hl\files.py", line 142, in make_fid

fid = h5f.open(name, flags, fapl=fapl)


File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper


File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper


File "h5py\h5f.pyx", line 78, in h5py.h5f.open


OSError: Unable to open file (unable to open file: name = 'E:\kits19\checkpointianry_crossentropy\cp-0006.ckpt', errno = 22, error message = 'Invalid argument', flags = 0, o_flags = 0)



In [68]:


In [68]: latest = 'E:\kits19\checkpoints\cp-0006.ckpt'


In [69]: model.load_weights(latest)


In [70]: loss, acc = model.evaluate(X_valid, y_valid)

    ...: print("Restored model, loss: {:5.2f}% , accuracy: {:5.2f}%".format(100*loss, 100*acc))

1123/1123 [==============================] - 393s 350ms/step

Restored model, loss: 4.03% , accuracy: 96.02%


In [71]: preds_train = model.predict(X_train, verbose=1)

    ...: preds_val = model.predict(X_valid, verbose=1)

    ...:

    ...: # Threshold predictions

    ...: preds_train_t = (preds_train > 0.5).astype(np.uint8)

    ...: preds_val_t = (preds_val > 0.5).astype(np.uint8)

4492/4492 [==============================] - 1555s 346ms/step

1123/1123 [==============================] - 394s 350ms/step


In [72]: def plot_sample(X, y, preds, binary_preds, ix=None):

    ...: if ix is None:

    ...: ix = random.randint(0, len(X))

    ...:

    ...: has_mask = y[ix].max() > 0

    ...:

    ...: fig, ax = plt.subplots(1, 4, figsize=(20, 10))

    ...:

    ...: ax[0].imshow(X[ix, ..., 0], cmap='seismic')

    ...:

    ...: if has_mask:

    ...: ax[0].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[0].set_title('Image')

    ...:

    ...: ax[1].imshow(y[ix].squeeze())

    ...: ax[1].set_title('Annotation')

    ...:

    ...: ax[2].imshow(preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[2].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[2].set_title('Prediction')

    ...:

    ...: ax[3].imshow(binary_preds[ix].squeeze(), vmin=0, vmax=1)

    ...: if has_mask:

    ...: ax[3].contour(y[ix].squeeze(), colors='k', levels=[0.5])

    ...: ax[3].set_title('Threshold Prediction');


In [73]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break












In [74]: preds_train_t = (preds_train > 0.6).astype(np.uint8)

    ...: preds_val_t = (preds_val > 0.6).astype(np.uint8)


In [75]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break












In [76]: preds_train_t = (preds_train > 0.9).astype(np.uint8)

    ...: preds_val_t = (preds_val > 0.9).astype(np.uint8)


In [77]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_train, y_train, preds_train, preds_train_t, ix=i)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break












In [78]: count = 0

    ...: for i in range(len(X_train)):

    ...: plot_sample(X_valid, y_valid, preds_val, preds_val_t, ix=200)

    ...: count = count + 1

    ...:

    ...: if count == 10:

    ...: break












In [79]: